Export symbols needed by Android drivers
authorBen Hutchings <ben@decadent.org.uk>
Tue, 26 Jun 2018 15:59:01 +0000 (16:59 +0100)
committerBen Hutchings <ben@decadent.org.uk>
Tue, 19 Nov 2019 01:43:33 +0000 (01:43 +0000)
We want to enable use of the Android ashmem and binder drivers to
support Anbox, but they should not be built-in as that would waste
resources and increase security attack surface on systems that don't
need them.

Export the currently un-exported symbols they depend on.

Gbp-Pq: Topic debian
Gbp-Pq: Name export-symbols-needed-by-android-drivers.patch

fs/file.c
kernel/fork.c
kernel/sched/core.c
kernel/signal.c
kernel/task_work.c
mm/memory.c
mm/shmem.c
mm/vmalloc.c
security/security.c

index 3da91a112babe874af392635a32e971d8885937f..fbb6f559a717377be43062462cd97b6a56423aed 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -409,6 +409,7 @@ struct files_struct *get_files_struct(struct task_struct *task)
 
        return files;
 }
+EXPORT_SYMBOL_GPL(get_files_struct);
 
 void put_files_struct(struct files_struct *files)
 {
@@ -421,6 +422,7 @@ void put_files_struct(struct files_struct *files)
                kmem_cache_free(files_cachep, files);
        }
 }
+EXPORT_SYMBOL_GPL(put_files_struct);
 
 void reset_files_struct(struct files_struct *files)
 {
@@ -534,6 +536,7 @@ out:
        spin_unlock(&files->file_lock);
        return error;
 }
+EXPORT_SYMBOL_GPL(__alloc_fd);
 
 static int alloc_fd(unsigned start, unsigned flags)
 {
@@ -607,6 +610,7 @@ void __fd_install(struct files_struct *files, unsigned int fd,
        rcu_assign_pointer(fdt->fd[fd], file);
        rcu_read_unlock_sched();
 }
+EXPORT_SYMBOL_GPL(__fd_install);
 
 void fd_install(unsigned int fd, struct file *file)
 {
@@ -669,6 +673,7 @@ out_unlock:
        *res = NULL;
        return -ENOENT;
 }
+EXPORT_SYMBOL(__close_fd_get_file);
 
 void do_close_on_exec(struct files_struct *files)
 {
index 3647097e67835ca8abe81f3f0fdf39514db088cb..1bffa509844324bf2a3dd20bacd6bcf710dcdd2b 100644 (file)
@@ -1102,6 +1102,7 @@ void mmput_async(struct mm_struct *mm)
                schedule_work(&mm->async_put_work);
        }
 }
+EXPORT_SYMBOL_GPL(mmput_async);
 #endif
 
 /**
index fffe790d98bb28dfd63a146ce71653fc3ef24101..e37a4dfe5d0351a8de9a0a3f11ffdd9f17e9625e 100644 (file)
@@ -4403,6 +4403,7 @@ int can_nice(const struct task_struct *p, const int nice)
        return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
                capable(CAP_SYS_NICE));
 }
+EXPORT_SYMBOL_GPL(can_nice);
 
 #ifdef __ARCH_WANT_SYS_NICE
 
index 534fec266a334b299846034e878c271619fe5c71..2501b4a61c0e8741bad904acadaee783cf540779 100644 (file)
@@ -1391,6 +1391,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
 
        return sighand;
 }
+EXPORT_SYMBOL_GPL(__lock_task_sighand);
 
 /*
  * send signal info to all the members of a group
index 0fef395662a6ea6f38301e92f74d14f60b3f145d..9233b200c098579ac89ee7c91ffbb0c545a509a8 100644 (file)
@@ -40,6 +40,7 @@ task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
                set_notify_resume(task);
        return 0;
 }
+EXPORT_SYMBOL(task_work_add);
 
 /**
  * task_work_cancel - cancel a pending work added by task_work_add()
index e2bb51b6242e5814896b79cce637008f73a073bb..055ed4728fedcda33a0facebcb9b9d20f41db69d 100644 (file)
@@ -1335,6 +1335,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
        mmu_notifier_invalidate_range_end(&range);
        tlb_finish_mmu(&tlb, start, range.end);
 }
+EXPORT_SYMBOL_GPL(zap_page_range);
 
 /**
  * zap_page_range_single - remove user pages in a given range
index 2bed4761f2795695b2e970c304f8985aeacef9f6..6f494dbcfccc55a657550dd445dc1591ccdfb038 100644 (file)
@@ -4073,6 +4073,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(shmem_zero_setup);
 
 /**
  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
index 7ba11e12a11f33b38a007a038bbfc7dee2fe48b3..885f9c944191cb59ff8de4d23ebb779c98e95c00 100644 (file)
@@ -1956,6 +1956,7 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size,
 {
        return vmap_page_range_noflush(addr, addr + size, prot, pages);
 }
+EXPORT_SYMBOL_GPL(map_kernel_range_noflush);
 
 /**
  * unmap_kernel_range_noflush - unmap kernel VM area
@@ -2098,6 +2099,7 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
                                  NUMA_NO_NODE, GFP_KERNEL,
                                  __builtin_return_address(0));
 }
+EXPORT_SYMBOL_GPL(get_vm_area);
 
 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
                                const void *caller)
index 250ee2d76406d76b5746259fe76dbbed9f51f13b..18c244f449e025922b9b84ab6ba2efd5114524fd 100644 (file)
@@ -640,24 +640,28 @@ int security_binder_set_context_mgr(struct task_struct *mgr)
 {
        return call_int_hook(binder_set_context_mgr, 0, mgr);
 }
+EXPORT_SYMBOL_GPL(security_binder_set_context_mgr);
 
 int security_binder_transaction(struct task_struct *from,
                                struct task_struct *to)
 {
        return call_int_hook(binder_transaction, 0, from, to);
 }
+EXPORT_SYMBOL_GPL(security_binder_transaction);
 
 int security_binder_transfer_binder(struct task_struct *from,
                                    struct task_struct *to)
 {
        return call_int_hook(binder_transfer_binder, 0, from, to);
 }
+EXPORT_SYMBOL_GPL(security_binder_transfer_binder);
 
 int security_binder_transfer_file(struct task_struct *from,
                                  struct task_struct *to, struct file *file)
 {
        return call_int_hook(binder_transfer_file, 0, from, to, file);
 }
+EXPORT_SYMBOL_GPL(security_binder_transfer_file);
 
 int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
 {